bitkeeper revision 1.1159.1.307 (4182266cAGOQsr2Dkb1NULqhnlOgQg)
authormwilli2@equilibrium.research <mwilli2@equilibrium.research>
Fri, 29 Oct 2004 11:15:56 +0000 (11:15 +0000)
committermwilli2@equilibrium.research <mwilli2@equilibrium.research>
Fri, 29 Oct 2004 11:15:56 +0000 (11:15 +0000)
Thread a linked list of ballooned pages through the mem_map array.
Balloon driver should now work under migration and suspend / resume.

linux-2.6.9-xen-sparse/drivers/xen/balloon/balloon.c

index b9ebda1785de1797df3e62cdccca34bbf92750d3..1e2288a2e14945f6c847f8c157697dce20122e37 100644 (file)
@@ -60,6 +60,8 @@
 #include <asm/uaccess.h>
 #include <asm/tlb.h>
 
+#include <linux/list.h>
+
 /* USER DEFINES -- THESE SHOULD BE COPIED TO USER-SPACE TOOLS */
 #define USER_INFLATE_BALLOON  1   /* return mem to hypervisor */
 #define USER_DEFLATE_BALLOON  2   /* claim mem from hypervisor */
@@ -74,11 +76,60 @@ static struct proc_dir_entry *balloon_pde;
 unsigned long credit;
 static unsigned long current_pages, most_seen_pages;
 
-/*
- * Dead entry written into balloon-owned entries in the PMT.
- * It is deliberately different to INVALID_P2M_ENTRY.
- */
-#define DEAD 0xdead1234
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+
+/* Head of the list of ballooned pages */
+struct page *ball_pg_hd = NULL;
+
+void add_ballooned_page(unsigned long pfn)
+{
+    struct page *p = mem_map + pfn;
+    
+    p->private = (unsigned long)ball_pg_hd;
+    ball_pg_hd = p;
+}
+
+struct page *rem_ballooned_page(void)
+{
+    if ( ball_pg_hd != NULL )
+    {
+        struct page *ret = ball_pg_hd;
+        ball_pg_hd = (struct page *)ball_pg_hd->private;
+        return ret;
+    }
+    else
+        return NULL;
+}   
+
+#else
+/* List of ballooned pages, threaded through the mem_map array. */
+LIST_HEAD(ballooned_pages);
+
+void add_ballooned_page(unsigned long pfn)
+{
+    struct page *p = mem_map + pfn;
+
+    list_add(&p->list, &ballooned_pages);
+}
+
+struct page *rem_ballooned_page(void)
+{
+    if(!list_empty(&ballooned_pages))
+    {
+        struct list_head *next;
+        struct page *ret;
+
+        next = ballooned_pages.next;
+        ret = list_entry(next, struct page, list);
+        list_del(next);
+
+        return ret;
+    }
+    else
+        return NULL;
+}
+
+#endif
 
 static inline pte_t *get_ptep(unsigned long addr)
 {
@@ -101,6 +152,7 @@ static inline pte_t *get_ptep(unsigned long addr)
 
 /* Main function for relinquishing memory. */
 static unsigned long inflate_balloon(unsigned long num_pages)
+
 {
     unsigned long *parray;
     unsigned long *currp;
@@ -130,6 +182,7 @@ static unsigned long inflate_balloon(unsigned long num_pages)
             currp = parray;
             for ( j = 0; j < i; j++, currp++ )
                 __free_page((struct page *) (mem_map + *currp));
+
             ret = -EFAULT;
             goto cleanup;
         }
@@ -156,7 +209,10 @@ static unsigned long inflate_balloon(unsigned long num_pages)
             kunmap(&mem_map[*currp]);
         }
 #endif
-        phys_to_machine_mapping[*currp] = DEAD;
+
+        add_ballooned_page(*currp);
+
+        phys_to_machine_mapping[*currp] = INVALID_P2M_ENTRY;
         *currp = mfn;
     }
 
@@ -195,28 +251,38 @@ static unsigned long process_returned_pages(unsigned long * parray,
      * incorporated here.
      */
      
-    unsigned long tot_pages = most_seen_pages;   
     unsigned long * curr = parray;
     unsigned long num_installed;
-    unsigned long i;
+
+    struct page *page;
 
     num_installed = 0;
-    for ( i = 0; (i < tot_pages) && (num_installed < num); i++ )
+    while ( (page = rem_ballooned_page()) != NULL )
     {
-        if ( phys_to_machine_mapping[i] == DEAD )
-        {
-            phys_to_machine_mapping[i] = *curr;
-            queue_machphys_update(*curr, i);
-            if (i<max_low_pfn)
-              queue_l1_entry_update(
-                get_ptep((unsigned long)__va(i << PAGE_SHIFT)),
-                ((*curr) << PAGE_SHIFT) | pgprot_val(PAGE_KERNEL));
+        unsigned long pfn;
+
+        if ( num_installed == num )
+            break;
 
-            __free_page(mem_map + i);
+        pfn = page - mem_map;
 
-            curr++;
-            num_installed++;
+        if(phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY)
+        {
+            printk("BUG: Tried to unballoon existing page!");
+            BUG();
         }
+
+        phys_to_machine_mapping[pfn] = *curr;
+        queue_machphys_update(*curr, pfn);
+        if (pfn<max_low_pfn)
+            queue_l1_entry_update(
+                get_ptep((unsigned long)__va(pfn << PAGE_SHIFT)),
+                ((*curr) << PAGE_SHIFT) | pgprot_val(PAGE_KERNEL));
+        
+        __free_page(mem_map + pfn);
+
+        curr++;
+        num_installed++;
     }
 
     return num_installed;